Permalink
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
1856 lines (1509 sloc) 43.9 KB
//
// Copyright 2018 SenX S.A.S.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// D I S T R I B U T E D
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// FYI you can reference attributes to avoid duplication and make configuration easy to update
//
// kafka.data.topic = data
// ingress.kafka.data.topic = ${kafka.data.topic}
//
//
// Time units of the platform
// ns means we store nanoseconds
// us means we store microseconds
// ms means we store milliseconds
//
warp.timeunits = us
//
// Comma separated list of Warp 10 plugins to instantiate.
//
#warp10.plugins = packageW.classX,packageY.classZ
//
// Secret for REPORT, if unset, a random secret will be generated and output in the logs and on stdout
//
#warp10.report.secret =
//
// Specific plugin loading
//
#warpscript.plugins.xxx = package.class
// Integrated plugins:
// Quantum MUST be loaded using this line to make sure its jar is automatically added to the classpath
#warp10.plugin.quantum = io.warp10.plugins.quantum.QuantumPlugin
#warp10.plugin.http = io.warp10.plugins.http.HTTPWarp10Plugin
#warp10.plugin.udp = io.warp10.plugins.udp.UDPWarp10Plugin
#warp10.plugin.tcp = io.warp10.plugins.tcp.TCPWarp10Plugin
#warp10.plugin.influxdb = io.warp10.plugins.influxdb.InfluxDBWarp10Plugin
//
// Comma separated list of headers to return in Access-Control-Allow-Headers on top of the token header.
// This applies to all HTTP endpoints.
//
#cors.headers =
//
// Maximum size for encoders exchanged within Warp 10.
// Make sure this value is less than the maximum size of
// a Kafka message.
//
max.encoder.size = 100000
//
// Comma separated list of components to run in distributed mode
// Valid values are 'ingress', 'directory', 'store', 'egress', 'fetch', 'plasmaFE', 'plasmaBE', 'runner'
//
warp.components =
//
// 128 bits key for verifying class names, format is hex:hhhhh...hhhh
//
warp.hash.class = hex:hhhhhh...
//
// 128 bits key for verifying labels, format is hex:hhhhh...hhhh
//
warp.hash.labels = hex:hhhhhh...
//
// 128 bits key for verifying index names, format is hex:hhhhh...hhhh
//
warp.hash.index = hex:hhhhhh...
//
// 128 bits key for verifying tokens, format is hex:hhhhh...hhhh
//
warp.hash.token = hex:hhhhhh...
//
// 128 bits key for verifying app names, format is hex:hhhhh...hhhh
//
warp.hash.app = hex:hhhhhh...
//
// 256 bits key for protecting tokens, format is hex:hhhh...hhhh
//
warp.aes.token = hex:hhhhhh...
//
// 256 bits key to generate secure scripts
//
warp.aes.scripts = hex:hhhhhh...
//
// AES key to wrap metasets
//
warp.aes.metasets = hex:hhhhhhhh....
//
// 256 bits key for protecting log messages, format is hex:hhhhh...hhhh
//
#warp.aes.logging = hex:hhhhhh...
//
// How often (in ms) should we refetch the region start/end keys
//
#warp.hbase.regionkeys.updateperiod =
//
// String returned by the IDENT function
//
#warp.ident =
//
// OSS Master Key, used to decrypt any 'wrapped:base64' key
//
#oss.master.key = hex:hhhhhh...
//
// Token file
//
#warp.token.file =
//
// Root directory where trl files are stored
//
#warp.trl.dir =
//
// Pre-Shared key for signing fetch requests. Signed fetch request expose owner/producer
//
#fetch.psk =
//
// Set to true to disable plasma
//
#warp.plasma.disable =
//
// Set to true to disable mobius
//
#warp.mobius.disable =
//
// Set to true to disable streaming updates
//
#warp.streamupdate.disable =
/////////////////////////////////////////////////////////////////////////////////////////
//
// W A R P S R I P T
//
/////////////////////////////////////////////////////////////////////////////////////////
// Maximum time that TIMEBOX can wait for an execution (in ms), defaults to 30s
#warpscript.timebox.maxtime=30000
// Path of the 'bootstrap' WarpScript code for the interactive mode
#warpscript.interactive.bootstrap.path =
// How often to reload the bootstrap code (in ms) for the interactive mode
#warpscript.interactive.bootstrap.period =
// Maximum number of parallel interactive sessions (defaults to 1).
#warpscript.interactive.capacity =
// Port on which the TCP endpoint of the interactive mode will listen. If undefined, no TCP endpoint will be available.
#warpscript.interactive.tcp.port =
// Set to 'true' to disable the interactive mode completely
#warp.interactive.disable = true
//
// Number of registers to allocate in the stack defaults to 256
//
#warpscript.registers = 256
//
// This configuration parameter determines if undefining a function (via NULL 'XXX' DEF)
// will unshadow the original statement thus making it available again or if it will replace
// it with a function that will fail with a message saying the function is undefined.
// The safest behavior is to leave this undefined or set to 'false'.
//
warpscript.def.unshadow = false
//
// Jar Repository - for Warp10 UDF only
//
// The path to a directory containing jar files from which UDFs can be loaded
#warpscript.jars.directory =
// How often (in ms) should Warp 10 rescan the above directory
#warpscript.jars.refresh = 60000
// Set to 'true' to allow loading the UDFs from the classpath when no directory is explicitely defined
#warpscript.jars.fromclasspath =
//
// Macro Repository
//
// Directory which contains subdirectories with macros
#warpscript.repository.directory =
// How often (in ms) should the above directory be rescanned
#warpscript.repository.refresh = 60000
// Set to false to disable on demand loading of macros not yet loaded.
#warpscript.repository.ondemand = false
// Default TTL for macros loaded on demand
#warpscript.repository.ttl = 600000
// TTL to use for failed macros, a new on-demand loading will occur after this delay.
// Defaults to the max of 10s or half of warpscript.repository.refresh
#warpscript.repository.ttl =
// Maximum TTL that can be set using MACROTTL (defaults to 2**62 ms)
#warpscript.repository.ttl.hard =
//
// Default maximum number of operations a single WarpScript execution can do
//
warpscript.maxops = 1000
warpscript.maxops.hard = 2000
//
// Maximum number of buckets which can result of a call to BUCKETIZE
// Can be modified by MAXBUCKETS up to the hard limit below
//
warpscript.maxbuckets = 1000000
warpscript.maxbuckets.hard = 100000
//
// Maximum number of cells in geographic shapes
// Can be modified by MAXGEOCELLS up to the hard limit below
//
warpscript.maxgeocells = 10000
warpscript.maxgeocells.hard = 100000
//
// Maximum depth of the stack
// Can be modified by MAXDEPTH up to the hard limit below
//
warpscript.maxdepth = 1000
warpscript.maxdepth.hard = 1000
//
// Maximum number of datapoint which can be fetched during a WarpScript execution
// Can be modified by LIMIT up to the hard limit below
//
warpscript.maxfetch = 100000
warpscript.maxfetch.hard = 1000000
//
// Maximum number of GTS which can be retrieved from Directory during a WarpScript execution
// Can be modified by MAXGTS up to the hard limit below
//
warpscript.maxgts = 100000
warpscript.maxgts.hard = 100000
//
// Maximum time (in ms) that can be spent in a loop
// Can be modified by MAXLOOP up to the hard limit below
//
warpscript.maxloop = 5000
warpscript.maxloop.hard = 10000
//
// Maximum levels of recursion in macro calls
//
warpscript.maxrecursion = 16
warpscript.maxrecursion.hard = 32
//
// Maximum number of symbols which can be defined by a single WarpScript
// Can be modified by MAXSYMBOLS up to the hard limit below
warpscript.maxsymbols = 64
warpscript.maxsymbols.hard = 256
//
// Maximum number of pixels per image
//
warpscript.maxpixels = 1000000
warpscript.maxpixels.hard = 1000000
warpscript.maxwebcalls = 4
//
// CALL root directory property
//
#warpscript.call.directory =
//
// Path of the 'bootstrap' WarpScript code
//
//warpscript.bootstrap.path = /opt/warp10/etc/hello.mc2
//
// How often to reload the bootstrap code (in ms)
//
//warpscript.bootstrap.period = 120000
//
// URL for the 'update' endpoint
//
warpscript.update.endpoint = http://127.0.0.1:8080/api/v0/update
//
// URL for the 'meta' endpoint
//
warpscript.meta.endpoint = http://127.0.0.1:8080/api/v0/meta
//
// URL for the 'delete' endpoint
//
warpscript.delete.endpoint = http://127.0.0.1:8080/api/v0/delete
//
// Maximum number of subprogram instances which can be spawned
//
//warpscript.call.maxcapacity = 1
//
// Comma separated list of WarpScriptExtension classes to instantiate to modify the defined WarpScript functions.
// Extension classes can be prefixed with SORTKEY# to force the order in which the extensions will be loaded.
// The SORTKEY# prefix will be used only for sorting.
//
#warpscript.extensions = packageW.classX,packageY.classZ
//
// Specific extension loading
// Extension classes can be prefixed with SORTKEY# to force the order in which the extensions will be loaded.
// The SORTKEY# prefix will be used only for sorting.
//
#warpscript.extension.xxx = package.class
// Integrated extensions:
// CEVAL, SYNC
#warpscript.extension.concurrent = io.warp10.script.ext.concurrent.ConcurrentWarpScriptExtension
// LOGMSG, NOLOG, STDERR, STDOUT, TDESCRIBE
#warpscript.extension.debug = io.warp10.script.ext.debug.DebugWarpScriptExtension
// HLOCATE
#warpscript.extension.hbase = io.warp10.script.ext.debug.HBaseWarpScriptExtension
// FUNCTIONS
#warpscript.extension.inventory = io.warp10.script.ext.inventory.InventoryWarpScriptExtension
// LOGEVENTTO
#warpscript.extension.logging = io.warp10.script.ext.logging.LoggingWarpScriptExtension
// REXEC
#warpscript.extension.rexec = io.warp10.script.ext.rexec.RexecWarpScriptExtension
// Comma separated list of included and excluded (! prefixed) host patterns
#warpscript.rexec.endpoint.patterns = .*
// SENSISIONEVENT, SENSISIONGET, SENSISIONSET, SENSISIONUPDATE
#warpscript.extension.sensision = io.warp10.script.ext.sensision.SensisionWarpScriptExtension
// MUTEX, SHMLOAD, SHMSTORE
#warpscript.extension.shm = io.warp10.script.ext.shm.SharedMemoryWarpScriptExtension
// TOKENDUMP, TOKENGEN
#warpscript.extension.token = io.warp10.script.ext.token.TokenWarpScriptExtension
// URLFETCH, MAXURLFETCHCOUNT, MAXURLFETCHSIZE
#warpscript.extension.urlfetch = io.warp10.script.ext.urlfetch.UrlFetchWarpScriptExtension
//
// Specific namespace under which to load an extension. The specified namespace will be used as a prefix for all functions of the extension.
//
//warpscript.namespace.package.class = namespace.
//
// Path of the 'bootstrap' WarpScript code for Mobius
//
//warpscript.mobius.bootstrap.path =
//
// Number of threads in the Mobius pool (those threads are used to execute the mobius macros)
//
warpscript.mobius.pool = 16
//
// How often to reload the bootstrap code (in ms) for Mobius
//
//warpscript.mobius.bootstrap.period =
//
// Path of the 'bootstrap' WarpScript code for Runner
//
//warpscript.runner.bootstrap.path =
//
// How often to reload the bootstrap code (in ms) for Mobius
//
//warpscript.runner.bootstrap.period =
/////////////////////////////////////////////////////////////////////////////////////////
//
// W E B C A L L
//
/////////////////////////////////////////////////////////////////////////////////////////
webcall.user.agent = Warp10-WebCall
//
// List of patterns to include/exclude for hosts in WebCall calls
//
// Typical value is .*,!^127.0.0.1$,!^localhost$,!^192.168.*,!^10.*,!^172.(16|17|18|19|20|21|22|23|24|25|26|27|28|29|39|31)\..*
// Defaults to .*
//
webcall.host.patterns = !.*
//
// ZK Quorum to use for reaching the Kafka cluster to consume WebCall requests
//
webcall.kafka.zkconnect =
//
// List of Kafka brokers to use for sending WebCall requests
//
webcall.kafka.brokerlist =
//
// Topic to use for WebCall requests
//
webcall.kafka.topic =
//
// AES key to use for encrypting WebCall requests
//
//webcall.kafka.aes = hex:......
//
// SipHash key to use for computing WebCall requests HMACs
//
//webcall.kafka.mac = hex:.....
//
// Kafka client id to use when consuming WebCall requests
//
//webcall.kafka.consumer.clientid =
//
// Kafka client id to use when producing WebCall requests
//
//webcall.kafka.producer.clientid =
//
// How many threads to spawn
//
webcall.nthreads = 4
//
// Groupid to use when consuming Kafka
//
//webcall.kafka.groupid =
//
// How often to commit the Kafka offsets
//
webcall.kafka.commitperiod = 60000
//
// Name of partition assignment strategy to use
//
//webcall.kafka.consumer.partition.assignment.strategy =
/////////////////////////////////////////////////////////////////////////////////////////
//
// D I R E C T O R Y
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// Activity period (in ms) to consider when flushing Metadata to HBase
// Should match ingress.activity.window unless you know what you are doing...
//
#directory.activity.window = 3600000
//
// Comma separated list of Directory related HBase configuration keys. Each key will
// be set in the HBase configuration by assigning the value defined in the Warp 10 config
// under the key 'directory.<HBASE_CONFIGURATION_KEY>'. Each listed HBase configuration key
// MUST have a value defined in the 'directory.' prefixed configuration parameter.
//
#directory.hbase.config =
//
// Maximum number of classes for which to report detailed stats in 'FINDSTATS'
//
directory.stats.class.maxcardinality = 100
//
// Maximum number of labels for which to report detailed stats in 'FINDSTATS'
//
directory.stats.labels.maxcardinality = 100
//
// Maximum size of Thrift frame for directory service
//
directory.frame.maxlen = 0
//
// Maximum number of Metadata to return in find responses
//
directory.find.maxresults = 100000
//
// Hard limit on number of find results. After this limit, the find request will fail.
//
directory.find.maxresults.hard = 100000
//
// Zookeeper ZK connect string for Kafka ('metadata' topic)
//
directory.kafka.metadata.zkconnect = 127.0.0.1:2181/zk/kafka/localhost
//
// Actual 'metadata' topic
//
directory.kafka.metadata.topic = metadata
//
// Key to use for computing MACs (128 bits in hex or OSS reference)
//
directory.kafka.metadata.mac = hex:hhhhhh...
//
// Key to use for encrypting payloads (128/192/256 bits in hex or OSS reference)
//
directory.kafka.metadata.aes = hex:hhhhhh...
//
// Key to use for encrypting metadata in HBase (128/192/256 bits in hex or OSS reference)
//
directory.hbase.metadata.aes = hex:hhhhhh...
//
// Kafka group id with which to consume the metadata topic
//
directory.kafka.metadata.groupid = directory.metadata-localhost
//
// Delay between synchronization for offset commit
//
directory.kafka.metadata.commitperiod = 1000
//
// Maximum byte size we allow the pending Puts list to grow to
//
directory.hbase.metadata.pendingputs.size = 1000000
//
// ZooKeeper Quorum for locating HBase
//
directory.hbase.metadata.zkconnect = 127.0.0.1:2181
//
// HBase table where metadata should be stored
//
directory.hbase.metadata.table = continuum
//
// Columns family under which metadata should be stored
//
directory.hbase.metadata.colfam = m
//
// Parent znode under which HBase znodes will be created
//
directory.hbase.metadata.znode = /zk/hbase/localhost
//
// ZooKeeper server list for registering
//
directory.zk.quorum = 127.0.0.1:2181
//
// ZooKeeper znode under which to register
//
directory.zk.znode = /zk/warp/localhost/services
//
// Number of threads to run for ingesting metadata from Kafka
//
directory.kafka.nthreads = 1
//
// Number of threads to run for serving directory requests
//
directory.service.nthreads = 12
//
// Partition of metadatas we focus on, format is MODULUS:REMAINDER
//
directory.partition = 1:0
//
// Port on which the DirectoryService will listen
//
directory.port = 8883
//
// Port the streaming directory service listens to
//
directory.streaming.port = 8885
//
// Number of Jetty selectors for the streaming server
//
directory.streaming.selectors = 4
//
// Number of Jetty acceptors for the streaming server
//
directory.streaming.acceptors = 2
//
// Idle timeout for the streaming directory endpoint
//
directory.streaming.idle.timeout = 300000
//
// Number of threads in Jetty's Thread Pool
//
directory.streaming.threadpool = 200
//
// Maximum size of Jetty ThreadPool queue size (unbounded by default)
//
directory.streaming.maxqueuesize = 400
//
// Jetty attributes for the streaming directory
//
#directory.streaming.jetty.attribute.XXX = value
// Example to modify the maximum size of acceptable form request, to support large selector regexps for example
#directory.streaming.jetty.attribute.org.eclipse.jetty.server.Request.maxFormContentSize = 10000000
//
// Address on which the DirectoryService will listen
//
directory.host = 127.0.0.1
//
// Pre-Shared Key (128 bits) for request fingerprinting
//
directory.psk = hex:hhhhhh...
//
// Max age of Find requests
//
directory.maxage = 1800000
//
// Number of threads to use for the initial loading of Metadata
//
directory.init.nthreads = 1
//
// Boolean indicating whether or not we should initialized Directory by reading HBase
//
directory.init = true
//
// Boolean indicating whether or not we should store in HBase metadata we get from Kafka
//
directory.store = true
//
// Boolean indicating whether or not we should do deletions in HBase
//
directory.delete = true
//
// Boolean indicting whether or not we should register in ZK
//
directory.register = true
//
// Class name of directory plugin to use
//
#directory.plugin.class =
//
// Boolean indicating whether or not we should use the HBase filter when initializing
//
directory.hbase.filter = false
//
// Kafka client id to use for consuming the metadata topic
//
//directory.kafka.metadata.consumer.clientid =
//
// ZooKeeper port for HBase client
//
# directory.hbase.zookeeper.property.clientPort =
//
// Strategy to adopt if consuming for the first time or if the last committed offset is past Kafka history
//
#directory.kafka.metadata.consumer.auto.offset.reset =
//
// Kafka client.id to use for the metadata topic consumer
//
#directory.kafka.metadata.consumer.clientid =
//
// Name of partition assignment strategy to use
//
#directory.kafka.metadata.consumer.partition.assignment.strategy =
//
// Size of metadata cache in number of entries
//
#directory.metadata.cache.size =
//
// Should we ignore the proxy settings when doing a streaming request?
//
#directory.streaming.noproxy =
/////////////////////////////////////////////////////////////////////////////////////////
//
// I N G R E S S
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// Period between updates of last activity timestamps for Geo Time Series
// The value of this parameter is in ms and determines how often the directory
// will be updated when activity is being tracked.
//
#ingress.activity.window = 3600000
//
// Set to true to consider updates when tracking activity of GTS
//
#ingress.activity.update = true
//
// Set to true to consider attributes updates (calls to /meta) when tracking activity of GTS
//
#ingress.activity.meta = true
//
// Should we shuffle the Geo Time Series prior to sending the delete messages?
// It is highly recommended to leave this to 'true', otherwise deletion of many series
// may lead to regions being pounded.
//
ingress.delete.shuffle = true
//
// Set to 'true' to reject all /delete requests
//
ingress.delete.reject = false
//
// Path where the metadata cache should be dumped
//
ingress.cache.dump.path = /opt/warp10/data/cache.metadata
//
// Maximum value size of a measurement
// Make sure this is less than 'max.encoder.size'
//
ingress.value.maxsize = 65536
//
// Host onto which the ingress server should listen
//
ingress.host = 127.0.0.1
//
// Port onto which the ingress server should listen
//
ingress.port = 8882
//
// Size of metadata cache in number of entries
//
ingress.metadata.cache.size = 10000000
//
// Number of acceptors
//
ingress.acceptors = 2
//
// Number of selectors
//
ingress.selectors = 8
//
// Idle timeout
//
ingress.idle.timeout = 300000
//
// Number of threads in Jetty's Thread Pool
//
ingress.jetty.threadpool = 200
//
// Maximum size of Jetty ThreadPool queue size (unbounded by default)
//
ingress.jetty.maxqueuesize = 400
//
// Max message size for the stream update websockets
//
ingress.websocket.maxmessagesize = 1048576
//
// ZooKeeper server list
//
ingress.zk.quorum = 127.0.0.1:2181
//
// ZK Connect String for the metadata kafka cluster
//
ingress.kafka.metadata.zkconnect = 127.0.0.1:2181/zk/kafka/localhost
//
// Kafka broker list for the 'meta' topic
//
ingress.kafka.metadata.brokerlist = 127.0.0.1:9092
//
// Actual 'meta' topic
//
ingress.kafka.metadata.topic = metadata
//
// Key to use for computing MACs (128 bits in hex or OSS reference)
//
ingress.kafka.metadata.mac = hex:hhhhhh...
//
// Key to use for encrypting payloads (128/192/256 bits in hex or OSS reference)
//
ingress.kafka.metadata.aes = hex:hhhhhh...
//
// Groupid to use for consuming the 'metadata' topic
// MUST be unique to each ingress instance
//
ingress.kafka.metadata.groupid = ingress.metadata-localhost
//
// Offset reset strategy when consuming the metadata topic
// 'smallest' should be left as the default.
//
ingress.kafka.metadata.consumer.auto.offset.reset = smallest
//
// How often to commit the offsets for topic 'metadata' (in ms)
//
ingress.kafka.metadata.commitperiod = 1000
//
// Number of threads to use for consuming the 'metadata' topic
//
ingress.kafka.metadata.nthreads = 2
//
// Kafka broker list for the 'data' topic
//
ingress.kafka.data.brokerlist = 127.0.0.1:9092
//
// Actual 'data' topic
//
ingress.kafka.data.topic = data
//
// Size of Kafka Producer pool for the 'data' topic
//
ingress.kafka.data.poolsize = 2
//
// Size of Kafka Producer pool for the 'metadata' topic
//
ingress.kafka.metadata.poolsize = 2
//
// Key to use for computing MACs (128 bits in hex or OSS reference)
//
ingress.kafka.data.mac = hex:hhhhhh...
//
// Key to use for encrypting payloads (128/192/256 bits in hex or OSS reference)
//
#ingress.kafka.data.aes =
//
// Maximum message size for the 'data' topic
//
ingress.kafka.data.maxsize = 900000
//
// Maximum message size for the 'metadata' topic
//
ingress.kafka.metadata.maxsize = 900000
//
// Kafka broker list for the throttling topic
//
#ingress.kafka.throttling.brokerlist =
//
// Optional client id to use when producing messages in the throttling topic
//
#ingress.kafka.throttling.producer.clientid =
//
// Kafka producer timeout for the throttling topic
//
#ingress.kafka.throttling.request.timeout.ms =
//
// Name of the throttling topic
//
#ingress.kafka.throttling.topic =
//
// ZK connect string for the throttling kafka cluster
//
#ingress.kafka.throttling.zkconnect =
//
// Client id to use when consuming the throttling topic
//
#ingress.kafka.throttling.consumer.clientid =
//
// Group id to use when consuming the throttling topic
//
#ingress.kafka.throttling.groupid =
//
// Auto offset strategy to use when consuming the throttling topic. Set to 'largest' unless you want to do
// a special experiment.
//
#ingress.kafka.throttling.consumer.auto.offset.reset = largest
//
// Kafka client id to use for the metadata producer
//
#ingress.kafka.metadata.producer.clientid =
//
// Kafka client id to use for the metadata consumer
//
#ingress.kafka.metadata.consumer.clientid =
//
// Kafka client id to use for the data producer
//
#ingress.kafka.data.producer.clientid =
//
// Do we send Metadata in the Kafka message for delete operations?
//
#ingress.delete.metadata.include =
//
// Request timeout when talking to Kafka
//
#ingress.kafka.data.request.timeout.ms =
//
// Name of partition assignment strategy to use
//
#ingress.kafka.metadata.consumer.partition.assignment.strategy =
//
// Identification of Ingress Metadata Update endpoint source
//
#ingress.metadata.update =
/////////////////////////////////////////////////////////////////////////////////////////
//
// S T O R E
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// Comma separated list of Store related HBase configuration keys. Each key will
// be set in the HBase configuration by assigning the value defined in the Warp 10 config
// under the key 'store.<HBASE_CONFIGURATION_KEY>'. Each listed HBase configuration key
// MUST have a value defined in the 'store.' prefixed configuration parameter.
//
#store.hbase.config =
//
// Throttling file path
//
#store.throttling.file =
//
// How often to reread the throttling file (in ms, defaults to 60000).
//
#store.throttling.period =
//
// How much to wait when the consumption was throttled, in ns (nanoseconds), defaults to 10 ms (milliseconds)
//
#store.throttling.delay =
//
// Key for encrypting data in HBase
//
#store.hbase.data.aes =
//
// Zookeeper ZK connect string for Kafka ('data' topic)
//
store.kafka.data.zkconnect = 127.0.0.1:2181/zk/kafka/localhost
//
// Kafka broker list for the 'data' topic
//
store.kafka.data.brokerlist = 127.0.0.1:9092
//
// Actual 'data' topic
//
store.kafka.data.topic = data
//
// Key to use for computing MACs (128 bits in hex or OSS reference)
//
store.kafka.data.mac = hex:hhhhhh...
//
// Key to use for encrypting payloads (128/192/256 bits in hex or OSS reference)
//
#store.kafka.data.aes =
//
// Kafka group id with which to consume the data topic
//
store.kafka.data.groupid = store.data
//
// Delay between synchronization for offset commit
//
store.kafka.data.commitperiod = 1000
//
// Maximum time (in ms) between offset synchronization - MUST be set to a value above that of store.kafka.data.commitperiod
// This parameter is there to detect calls to HBase which hang, which can happen when a RegionServer dies during a call to 'batch'.
// The value of this parameter must be set to a value longer than the longest running call to HBase's 'batch' or 'delete', otherwise
// the valid operations might not finish.
// Consider it as the time it takes to detect HBase failures. Values of 60000 to 120000 seem good starting points.
//
store.kafka.data.intercommits.maxtime = 120000
//
// Maximum size we allow the Puts list to grow to
//
store.hbase.data.maxpendingputssize = 1000000
//
// How many threads to spawn for consuming
// Each of these threads will commit data to HBase
//
store.nthreads = 2
//
// How many threads under each of 'store.nthreads' should consume Kafka.
// Defaults to 1 if unset.
//
store.nthreads.kafka = 1
//
// Number of threads in the pool used to process deletes. One such pool is created for each of 'store.nthreads'.
// Defaults to 0 meaning no pool is used.
//
store.nthreads.delete = 0
//
// ZooKeeper connect string for HBase
//
store.hbase.data.zkconnect = 127.0.0.1:2181
//
// HBase table where data should be stored
//
store.hbase.data.table = continuum
//
// Columns family under which data should be stored
//
store.hbase.data.colfam = v
//
// Parent znode under which HBase znodes will be created
//
store.hbase.data.znode = /zk/hbase/localhost
//
// Custom value of 'hbase.hconnection.threads.max' for the Store HBase pool
//
store.hbase.hconnection.threads.max = 4
//
// Custom value of 'hbase.hconnection.threads.core' for the Store HBase pool (MUST be <= STORE_HBASE_HCONNECTION_THREADS_MAX)
//
store.hbase.hconnection.threads.core = 4
//
// Custom value of 'hbase.rpc.timeout' (in ms) for Store HBase client, this is especially important to adapt when
// large deletes are possible.
// This value SHOULD be larger than the 'hbase.client.operation.timeout'.
//
#store.hbase.rpc.timeout =
//
// Timeout (in ms) for client operations (bulk delete, region listing, ..) in the Store HBase client. Defaults to 1200000 ms.
//
#store.hbase.client.operation.timeout =
//
// Number of times to retry RPCs in the Store HBase client. HBase default is 31.
//
#store.hbase.client.retries.number =
//
// Pause (in ms) between retries for the Store HBase client. HBase default is 100ms
//
#store.hbase.client.pause =
//
// Kafka client id to use for the data producer
//
#store.kafka.data.producer.clientid =
//
// Client id to use to consume the data topic
//
#store.kafka.data.consumer.clientid =
//
// A prefix prepended to the Kafka ConsumerId
//
#store.kafka.data.consumerid.prefix =
//
// Custom value of 'hbase.client.ipc.pool.size' for the Store HBase pool
//
#store.hbase.client.ipc.pool.size =
//
// ZooKeeper port for HBase client
//
#store.hbase.zookeeper.property.clientPort =
//
// Name of partition assignment strategy to use
//
#store.kafka.data.consumer.partition.assignment.strategy =
/////////////////////////////////////////////////////////////////////////////////////////
//
// P L A S M A
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// Maximum number of Geo Time Series a single Plasma session can subscribe to
//
#warp.plasma.maxsubs
//
// ZooKeeper connect string for Kafka consumer
//
plasma.frontend.kafka.zkconnect = 127.0.0.1:2181/zk/kafka/localhost
//
// Kafka topic to consume. This topic is dedicated to this Plasma frontend.
// One topic per instance of plasma frontend.
//
plasma.frontend.kafka.topic = plasmafe1
//
// Kafka groupid under which to consume above topic
//
plasma.frontend.kafka.groupid = plasmafe-1
//
// How often (in ms) to commit Kafka offsets
//
plasma.frontend.kafka.commitperiod = 1000
//
// Number of threads used for consuming Kafka topic
//
plasma.frontend.kafka.nthreads = 2
//
// Optional AES key for messages in Kafka
//
#plasma.frontend.kafka.aes =
//
// ZooKeeper connect String for subscription
//
plasma.frontend.zkconnect = 127.0.0.1:2181
//
// ZooKeeper root znode for subscriptions
//
plasma.frontend.znode = /zk/warp/localhost/plasma
//
// Maximum size of each znode (in bytes)
//
plasma.frontend.maxznodesize = 100000
//
// Host/IP on which to bind
//
plasma.frontend.host = 127.0.0.1
//
// Port on which to listen
//
plasma.frontend.port = 8884
//
// Number of acceptors
//
plasma.frontend.acceptors = 2
//
// Number of selectors
//
plasma.frontend.selectors = 4
//
// Max message size for the Plasma Frontend Websocket
//
plasma.frontend.websocket.maxmessagesize = 1048576
//
// Idle timeout
//
plasma.frontend.idle.timout = 300000
//
// SipHash key for computing MACs of Kafka messages
//
plasma.frontend.kafka.mac = hex:hhhh...
plasma.frontend.subscribe.delay = 5000
//
// Zookeeper ZK connect string for Kafka ('in' topic)
//
plasma.backend.kafka.in.zkconnect = 127.0.0.1:2181/zk/kafka/localhost
//
// Actual 'in' topic
//
plasma.backend.kafka.in.topic = data
//
// Key to use for computing MACs (128 bits in hex or OSS reference)
//
plasma.backend.kafka.in.mac = hex:hhhhhh...
//
// Key to use for encrypting payloads (128/192/256 bits in hex or OSS reference)
//
#plasma.backend.kafka.in.aes =
//
// Kafka group id with which to consume the in topic
//
plasma.backend.kafka.in.groupid = plasmabeIn-1
//
// Delay between synchronization for offset commit
//
plasma.backend.kafka.in.commitperiod = 1000
//
// Number of threads to run for reading off of Kafka
//
plasma.backend.kafka.in.nthreads = 2
//
// Kafka broker list for the 'out' topic
//
plasma.backend.kafka.out.brokerlist = 127.0.0.1:9092
//
// Maximum size of Kafka outward messages
//
plasma.backend.kafka.out.maxsize = 500000
//
// Key to use for computing MACs (128 bits in hex or OSS reference)
//
plasma.backend.kafka.out.mac = hex:hhhhhh...
//
// Key to use for encrypting payloads (128/192/256 bits in hex or OSS reference)
//
#plasma.backend.kafka.out.aes =
//
// ZooKeeper Quorum for the ZK ensemble to use for retrieving subscriptions
//
plasma.backend.subscriptions.zkconnect = 127.0.0.1:2181
//
// Parent znode under which subscription znodes will be created
//
plasma.backend.subscriptions.znode = /zk/warp/localhost/plasma
//
// Kafka client id to use for the data consumer
//
//plasma.frontend.kafka.consumer.clientid =
//
// Kafka client id to use for the data consumer
//
//plasma.backend.kafka.in.consumer.clientid =
//
// Kafka client id to use for the data producer towards the frontends
//
//plasma.backend.kafka.out.producer.clientid =
//
// Name of partition assignment strategy to use
//
//plasma.backend.kafka.in.consumer.partition.assignment.strategy =
//
// Name of partition assignment strategy to use
//
//plasma.frontend.kafka.consumer.partition.assignment.strategy =
/////////////////////////////////////////////////////////////////////////////////////////
//
// R U N N E R
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// Set to true to run each script a first time at startup, to false to schedule the
// first run at the next timestamp which is congruent to 0 modulo the period of the
// script
runner.runatstartup = true
//
// ZooKeeper connect string for the leader election among schedulers
//
runner.zk.quorum = 127.0.0.1:2181
//
// Znode to use for the leader election among schedulers
//
runner.zk.znode = /zk/warp/localhost/runner
//
// String uniquely identifying this instance of ScriptRunner
//
runner.id = runner-localhost-1
//
// Roles of the ScriptRunner instance. Can either be 'standalone' or any combination of 'scheduler' and 'worker'.
//
runner.roles = standalone
//
// Root directory under which scripts to run reside. The scripts MUST have a '.mc2' extension
// and reside in subdirectories of this root directory whose name is the periodicity (in ms) at
// which to run them.
//
#runner.root =
//
// Number of threads to use for running scripts.
//
runner.nthreads = 1
//
// How often (in ms) to scan RUNNER_ROOT for new scripts
//
runner.scanperiod = 60000
//
// warpscript endpoint to use for executing the scripts
//
runner.endpoint = http://127.0.0.1:8881/api/v0/exec/warpscript
//
// AES key for wrapping a runner nonce which can later be extracted using RUNNERNONCE
//
//runner.psk = hex:......
//
// Minimum period at which a script can be scheduled. Any script scheduled
// more often than that won't be run
//
runner.minperiod = 1000
//
// ZooKeeper connect string for the Kafka cluster
//
runner.kafka.zkconnect = 127.0.0.1:2181/zk/kafka/localhost
//
// List of Kafka brokers
//
runner.kafka.brokerlist = 127.0.0.1:9092
//
// Size of Kafka producer pool
//
runner.kafka.poolsize = 2
//
// Topic to use to submit the scripts
//
runner.kafka.topic = runner
//
// Groupid to use when consuming scripts
//
runner.kafka.groupid = runner
//
// Number of threads to spawn to consume scripts
//
runner.kafka.nthreads = 1
//
// Commit period for the script topic
//
runner.kafka.commitperiod = 1000
//
// Key for integrity checks
//
runner.kafka.mac = hex:hhhhhh...
//
// Key for encryption of scripts on topic
//
#runner.kafka.aes =
//
// Kafka client id to use for the producer
//
//runner.kafka.producer.clientid =
//
// Kafka client id to use for the consumer
//
//runner.kafka.consumer.clientid =
//
// Name of partition assignment strategy to use
//
//runner.kafka.consumer.partition.assignment.strategy =
/////////////////////////////////////////////////////////////////////////////////////////
//
// E G R E S S
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// Should the egress exec handler expose its store/directory clients?
//
egress.clients.expose = false
//
// Comma separated list of Egress related HBase configuration keys. Each key will
// be set in the HBase configuration by assigning the value defined in the Warp 10 config
// under the key 'egress.<HBASE_CONFIGURATION_KEY>'. Each listed HBase configuration key
// MUST have a value defined in the 'egress.' prefixed configuration parameter.
//
#egress.hbase.config =
//
// Port onto which the egress server should listen
//
egress.port = 8881
//
// Host onto which the egress server should listen
//
egress.host = 127.0.0.1
//
// Number of acceptors
//
egress.acceptors = 2
//
// Number of selectors
//
egress.selectors = 8
//
// Idle timeout
//
egress.idle.timeout = 300000
//
// ZooKeeper server list
//
egress.zk.quorum = 127.0.0.1:2181
//
// Key to use for encrypting data in HBase (128/192/256 bits in hex or OSS reference)
//
#egress.hbase.data.aes =
//
// Columns family under which data should be stored
//
egress.hbase.data.colfam = v
//
// HBase table where data should be stored
//
egress.hbase.data.table = continuum
//
// ZooKeeper Quorum for locating HBase
//
egress.hbase.data.zkconnect = 127.0.0.1:2181
//
// Parent znode under which HBase znodes will be created
//
egress.hbase.data.znode = /zk/hbase/localhost
//
// Threshold in number of GTS above which block caching will be disabled
//
egress.hbase.data.blockcache.gts.threshold = 1024
//
// Number of GTS to batch when retrieving datapoints (to mitigate responseTooSlow errors)
//
egress.fetch.batchsize = 10000
//
// Boolean indicating whether or not to use the HBase filter when retrieving rows.
//
egress.hbase.filter = true
//
// Key to use for encrypting GTSSplit instances
//
#egress.fetcher.aes =
//
// Maximum age of a valid GTSSplit (in ms)
//
#egress.fetcher.maxsplitage =
//
// Custom value of 'hbase.client.ipc.pool.size' for the Egress HBase pool
//
#egress.hbase.client.ipc.pool.size =
//
// Custom scanner lease period
//
#egress.hbase.client.scanner.timeout.period =
//
// GTS count threshold above which the filter will be used
//
#egress.hbase.filter.threshold =
//
// Custom value for RPC timeout
//
#egress.hbase.rpc.timeout =
//
// ZooKeeper port for HBase client
//
#egress.hbase.zookeeper.property.clientPort =
//
// Custom value of 'hbase.client.max.perserver.tasks', defaults to 2
//
#egress.hbase.client.max.perserver.tasks =
//
// Custom value of 'hbase.client.max.perregion.tasks', defaults to 1
//
#egress.hbase.client.max.perregion.tasks =
//
// Custom value of 'hbase.client.max.total.tasks', defaults to 100
//
#egress.hbase.client.max.total.tasks =
//
// Number of threads to use for scheduling parallel scanners. Use 0 to disable parallel scanners
//
#egress.hbase.parallelscanners.poolsize =
//
// Maximum number of parallel scanners per fetch request. Use 0 to disable parallel scanners.
//
#egress.hbase.parallelscanners.maxinflightperrequest =
//
// Minimum number of GTS to assign to a parallel scanner. If the number of GTS to fetch is below this limit, no
// parallel scanners will be spawned. Defaults to 4.
//
#egress.hbase.parallelscanners.min.gts.perscanner =
//
// Maximum number of parallel scanners to use when fetching datapoints for a batch of GTS (see EGRESS_FETCH_BATCHSIZE).
// Defaults to 16.
//
#egress.hbase.parallelscanners.max.parallel.scanners =
/////////////////////////////////////////////////////////////////////////////////////////
//
// T H R O T T L I N G M A N A G E R
//
/////////////////////////////////////////////////////////////////////////////////////////
//
// Name of system property (configuration property) which contains the
// root directory where throttle files are stored.
//
throttling.manager.dir = /opt/warp10/etc/throttle
//
// Period (in ms) between two scans of the THROTTLING_MANAGER_DIR
//
throttling.manager.period = 1000
//
// Ramp up period (in ms) during which we do not push the estimators to Sensision.
// This period (in ms) should be greater than the period at which the throttling files
// are updated, so we get a chance to have a merged estimator pushed to us even when
// we just restarted.
//
//throttling.manager.rampup = 120000
//
// Maximum number of estimators we cache in memory
//
//throttling.manager.estimator.cache.size = 10000
//
// Default value for the rate when not configured through a file
//
//throttling.manager.rate.default = 0.0
//
// Default value for the mads when not configured through a file
//
//throttling.manager.mads.default = 0
//
// Default value for the maxwait timeout
//
//throttling.manager.maxwait.default =
//
// Header containing the request UUID when calling the endpoint
//
#http.header.webcall.uuid =
//
// HTTP Header for elapsed time of warpscript scripts
//
#http.header.elapsed =
//
// Script line where an error was encountered
//
#http.header.error.line =
//
// Message for the error that was encountered
//
#http.header.error.message =
//
// HTTP Header for access tokens
//
#http.header.token =
//
// HTTP Header to provide the token for outgoing META requests
//
#http.header.token.META =
//
// HTTP Header to provide the token for outgoing UPDATE requests
//
#http.header.token.UPDATE =
//
// HTTP Header to provide the token for outgoing DELETE requests
//
#http.header.token.DELETE =
//
// HTTP Header for setting the base timestamp for relative timestamps
//
#http.header.now =
//
// Name of header containing the signature of the token used for the fetch
//
#http.header.fetch.signature =
//
// Name of header containing the signature of the token used for the update
//
#http.header.update.signature =
//
// Name of header containing the signature of streaming directory requests
//
#http.header.directory.signature =
//
// Name of header containing the name of the symbol in which to expose the request headers
//
#http.header.exposeheaders =
//
// HTTP Header for number of datapoints fetched during a script invocation
//
#http.header.fetched =
//
// HTTP Header for number of ops performed in a script invocation
//
#http.header.ops =
//
// HTTP Header to specify if we should show errors in /sfetch responses
//
#http.header.showerrors =
//
// HTTP Header for specifying the timespan in /sfetch requests
//
#http.header.timespan =
//
// HTTP Plugin
//
// IP the HTTP plugin will listen on
#http.host = 127.0.0.1
// Port the HTTP plugin will listen on
#http.port = 10080
// Number of Jetty acceptors to use (defaults to 2)
#http.acceptors = 2
// Number of Jetty selectors to use (defaults to 4)
#http.selectors = 4
// Number of threads Jetty should use (defaults to 1 + acceptors + acceptors * selectors which is the minimum)
#http.maxthreads = 11
// Timeout (in ms) when expecting a HTTP request (defaults to 30000 ms)
#http.idle.timeout = 30000
// Directory where spec files are located
#http.dir =
// Period (in ms) at which to scan 'http.dir' (defaults to 60s)
#http.period = 60000
// Size of the queue to use for the Jetty thread pool. By default no queue is used
#http.queuesize = 8
//
// UDP Plugin
//
// Directory where UDP 'spec' files will be located
#udp.dir =
// Period (in ms) at which to rescan 'udp.dir' for spec files changes (defaults to 60s)
#udp.period = 60000
//
// TCP Plugin
//
// Directory where TCP 'spec' files will be located
#tcp.dir =
// Period (in ms) at which to rescan 'tcp.dir' for spec files changes (defaults to 60s)
#tcp.scanperiod = 60000
//
// InfluxDB Plugin
//
// Port the InfluxDB plugin will listen on
#influxdb.port = 18086
// IP the InfluxDB plugin will listen on (defaults to 127.0.0.1)
#influxdb.host = 127.0.0.1
// Timeout (in ms) when expecting a HTTP request (defaults to 30000 ms)
#influxdb.idle.timeout = 30000
// Number of threads Jetty should use (defaults to 1 + acceptors + acceptors * selectors which is the minimum)
#influxdb.jetty.threadpool = 11
// Size of the queue to use for the Jetty thread pool. By default no queue is used
#influxdb.jetty.maxqueuesize = 8
// Number of Jetty acceptors to use (defaults to 2)
#influxdb.acceptors = 2
// Number of Jetty selectors to use (defaults to 4)
#influxdb.selectors = 4
// Warp 10 instance update endpoint to push the data to
#influxdb.warp10.endpoint = http://ip:port/api/v0/update
// Default token to be used to update the data to the specified Warp 10 instance. By default uses the password in the request as token.
#influxdb.default.token =
//
// URLFETCH extension
//
// Maximum number of URLFETCH calls, 64 by default for soft and hard
#warpscript.urlfetch.limit =
#warpscript.urlfetch.limit.hard =
// Maximum downloaded bytes by URLFETCH, 1000000 by default for soft and hard
#warpscript.urlfetch.maxsize =
#warpscript.urlfetch.maxsize.hard =
// List of patterns to include/exclude for hosts, works the same way as webcall.host.patterns. Defaults to the value of webcall.host.patterns.
#warpscript.urlfetch.host.patterns =