Skip to content

Commit

Permalink
modify the configuration to match this example #293 (comment)
Browse files Browse the repository at this point in the history
  • Loading branch information
jvshahid committed Mar 24, 2014
1 parent 7e957ec commit 1043101
Show file tree
Hide file tree
Showing 7 changed files with 74 additions and 48 deletions.
12 changes: 7 additions & 5 deletions config.toml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,13 @@ port = 8086 # binding is disabled if the port isn't set
# ssl-port = 8084 # Ssl support is enabled if you set a port and cert
# ssl-cert = /path/to/cert.pem

[graphite]
# optionally enable a graphite (carbon) compatible ingestion
enabled = false
port = 2003
database = "" # store graphite data in this database
[input_plugins]

# Configure the graphite api
[input_plugins.graphite]
enabled = false
# port = 2003
# database = "" # store graphite data in this database

# Raft configuration
[raft]
Expand Down
18 changes: 13 additions & 5 deletions src/configuration/config.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Welcome to the InfluxDB configuration file.

# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# that can be resovled here.
# hostname = ""

Expand All @@ -20,6 +20,14 @@ assets = "./admin"
ssl-port = 8087 # Ssl support is enabled if you set a port and cert
ssl-cert = "../cert.pem"

[input_plugins]

# Configure the graphite api
[input_plugins.graphite]
enabled = false
port = 2003
database = "" # store graphite data in this database

# Raft configuration
[raft]
# The raft port should be open between all servers in a cluster.
Expand Down Expand Up @@ -76,12 +84,12 @@ lru-cache-size = "200m"
# files. max-open-files is per shard so this * that will be max.
# max-open-shards = 0

# These options specify how data is sharded across the cluster. There are two
# These options specify how data is sharded across the cluster. There are two
# shard configurations that have the same knobs: short term and long term.
# Any series that begins with a capital letter like Exceptions will be written
# into the long term storage. Any series beginning with a lower case letter
# like exceptions will be written into short term. The idea being that you
# can write high precision data into short term and drop it after a couple
# can write high precision data into short term and drop it after a couple
# of days. Meanwhile, continuous queries can run downsampling on the short term
# data and write into the long term area.
[sharding]
Expand All @@ -96,7 +104,7 @@ lru-cache-size = "200m"
# over the network when doing a query.
duration = "7d"

# split will determine how many shards to split each duration into. For example,
# split will determine how many shards to split each duration into. For example,
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
# would be created that have the data for 2014-02-10. By default, data will
# be split into those two shards deterministically by hashing the (database, serise)
Expand Down
50 changes: 27 additions & 23 deletions src/configuration/configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,10 @@ type LoggingConfig struct {
}

type LevelDbConfiguration struct {
MaxOpenFiles int `toml:"max-open-files"`
LruCacheSize size `toml:"lru-cache-size"`
MaxOpenShards int `toml:"max-open-shards"`
PointBatchSize int `toml:"point-batch-size"`
MaxOpenFiles int `toml:"max-open-files"`
LruCacheSize size `toml:"lru-cache-size"`
MaxOpenShards int `toml:"max-open-shards"`
PointBatchSize int `toml:"point-batch-size"`
}

type ShardingDefinition struct {
Expand Down Expand Up @@ -160,19 +160,23 @@ type WalConfig struct {
RequestsPerLogFile int `toml:"requests-per-log-file"`
}

type InputPlugins struct {
Graphite GraphiteConfig `toml:"graphite"`
}

type TomlConfiguration struct {
Admin AdminConfig
Api ApiConfig
Graphite GraphiteConfig
Raft RaftConfig
Storage StorageConfig
Cluster ClusterConfig
Logging LoggingConfig
LevelDb LevelDbConfiguration
Hostname string
BindAddress string `toml:"bind-address"`
Sharding ShardingDefinition `toml:"sharding"`
WalConfig WalConfig `toml:"wal"`
Admin AdminConfig
HttpApi ApiConfig `toml:"api"`
InputPlugins InputPlugins `toml:"input_plugins"`
Raft RaftConfig
Storage StorageConfig
Cluster ClusterConfig
Logging LoggingConfig
LevelDb LevelDbConfiguration
Hostname string
BindAddress string `toml:"bind-address"`
Sharding ShardingDefinition `toml:"sharding"`
WalConfig WalConfig `toml:"wal"`
}

type Configuration struct {
Expand All @@ -198,7 +202,7 @@ type Configuration struct {
LevelDbMaxOpenFiles int
LevelDbLruCacheSize int
LevelDbMaxOpenShards int
LevelDbPointBatchSize int
LevelDbPointBatchSize int
ShortTermShard *ShardConfiguration
LongTermShard *ShardConfiguration
ReplicationFactor int
Expand Down Expand Up @@ -256,12 +260,12 @@ func parseTomlConfiguration(filename string) (*Configuration, error) {
config := &Configuration{
AdminHttpPort: tomlConfiguration.Admin.Port,
AdminAssetsDir: tomlConfiguration.Admin.Assets,
ApiHttpPort: tomlConfiguration.Api.Port,
ApiHttpCertPath: tomlConfiguration.Api.SslCertPath,
ApiHttpSslPort: tomlConfiguration.Api.SslPort,
GraphiteEnabled: tomlConfiguration.Graphite.Enabled,
GraphitePort: tomlConfiguration.Graphite.Port,
GraphiteDatabase: tomlConfiguration.Graphite.Database,
ApiHttpPort: tomlConfiguration.HttpApi.Port,
ApiHttpCertPath: tomlConfiguration.HttpApi.SslCertPath,
ApiHttpSslPort: tomlConfiguration.HttpApi.SslPort,
GraphiteEnabled: tomlConfiguration.InputPlugins.Graphite.Enabled,
GraphitePort: tomlConfiguration.InputPlugins.Graphite.Port,
GraphiteDatabase: tomlConfiguration.InputPlugins.Graphite.Database,
RaftServerPort: tomlConfiguration.Raft.Port,
RaftDir: tomlConfiguration.Raft.Dir,
ProtobufPort: tomlConfiguration.Cluster.ProtobufPort,
Expand Down
4 changes: 4 additions & 0 deletions src/configuration/configuration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ func (self *LoadConfigurationSuite) TestConfig(c *C) {
c.Assert(config.ApiHttpCertPath, Equals, "../cert.pem")
c.Assert(config.ApiHttpPortString(), Equals, "")

c.Assert(config.GraphiteEnabled, Equals, false)
c.Assert(config.GraphitePort, Equals, 2003)
c.Assert(config.GraphiteDatabase, Equals, "")

c.Assert(config.RaftDir, Equals, "/tmp/influxdb/development/raft")
c.Assert(config.RaftServerPort, Equals, 8090)

Expand Down
18 changes: 13 additions & 5 deletions src/integration/test_config1.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Welcome to the InfluxDB configuration file.

# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# that can be resovled here.
# hostname = ""

Expand All @@ -21,6 +21,14 @@ port = 60500
ssl-port = 60503
ssl-cert = "./cert.pem"

[input_plugins]

# Configure the graphite api
[input_plugins.graphite]
enabled = true
port = 60513
database = "graphite_db" # store graphite data in this database

# Raft configuration
[raft]
# The raft port should be open between all servers in a cluster.
Expand Down Expand Up @@ -62,12 +70,12 @@ write-buffer-size = 1000
# This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped.
query-shard-buffer-size = 500

# These options specify how data is sharded across the cluster. There are two
# These options specify how data is sharded across the cluster. There are two
# shard configurations that have the same knobs: short term and long term.
# Any series that begins with a capital letter like Exceptions will be written
# into the long term storage. Any series beginning with a lower case letter
# like exceptions will be written into short term. The idea being that you
# can write high precision data into short term and drop it after a couple
# can write high precision data into short term and drop it after a couple
# of days. Meanwhile, continuous queries can run downsampling on the short term
# data and write into the long term area.
[sharding]
Expand All @@ -82,7 +90,7 @@ query-shard-buffer-size = 500
# over the network when doing a query.
duration = "1h"

# split will determine how many shards to split each duration into. For example,
# split will determine how many shards to split each duration into. For example,
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
# would be created that have the data for 2014-02-10. By default, data will
# be split into those two shards deterministically by hashing the (database, serise)
Expand Down
10 changes: 5 additions & 5 deletions src/integration/test_config2.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Welcome to the InfluxDB configuration file.

# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# that can be resovled here.
# hostname = ""

Expand Down Expand Up @@ -60,12 +60,12 @@ write-buffer-size = 1000
# This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped.
query-shard-buffer-size = 500

# These options specify how data is sharded across the cluster. There are two
# These options specify how data is sharded across the cluster. There are two
# shard configurations that have the same knobs: short term and long term.
# Any series that begins with a capital letter like Exceptions will be written
# into the long term storage. Any series beginning with a lower case letter
# like exceptions will be written into short term. The idea being that you
# can write high precision data into short term and drop it after a couple
# can write high precision data into short term and drop it after a couple
# of days. Meanwhile, continuous queries can run downsampling on the short term
# data and write into the long term area.
[sharding]
Expand All @@ -80,7 +80,7 @@ query-shard-buffer-size = 500
# over the network when doing a query.
duration = "1h"

# split will determine how many shards to split each duration into. For example,
# split will determine how many shards to split each duration into. For example,
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
# would be created that have the data for 2014-02-10. By default, data will
# be split into those two shards deterministically by hashing the (database, serise)
Expand Down
10 changes: 5 additions & 5 deletions src/integration/test_config3.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Welcome to the InfluxDB configuration file.

# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# that can be resovled here.
# hostname = ""

Expand Down Expand Up @@ -60,12 +60,12 @@ write-buffer-size = 1000
# This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped.
query-shard-buffer-size = 500

# These options specify how data is sharded across the cluster. There are two
# These options specify how data is sharded across the cluster. There are two
# shard configurations that have the same knobs: short term and long term.
# Any series that begins with a capital letter like Exceptions will be written
# into the long term storage. Any series beginning with a lower case letter
# like exceptions will be written into short term. The idea being that you
# can write high precision data into short term and drop it after a couple
# can write high precision data into short term and drop it after a couple
# of days. Meanwhile, continuous queries can run downsampling on the short term
# data and write into the long term area.
[sharding]
Expand All @@ -80,7 +80,7 @@ query-shard-buffer-size = 500
# over the network when doing a query.
duration = "1h"

# split will determine how many shards to split each duration into. For example,
# split will determine how many shards to split each duration into. For example,
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
# would be created that have the data for 2014-02-10. By default, data will
# be split into those two shards deterministically by hashing the (database, serise)
Expand Down

0 comments on commit 1043101

Please sign in to comment.