Skip to content

Commit

Permalink
Add vendoring & Upgrade libbeat to 6.4.1 (#10)
Browse files Browse the repository at this point in the history
  • Loading branch information
boernd committed Sep 28, 2018
1 parent 885b2bb commit c83341c
Show file tree
Hide file tree
Showing 5,919 changed files with 496,516 additions and 806,625 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
803 changes: 803 additions & 0 deletions Gopkg.lock

Large diffs are not rendered by default.

59 changes: 59 additions & 0 deletions Gopkg.toml
@@ -0,0 +1,59 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true


[[constraint]]
name = "github.com/elastic/beats"
version = "v6.4.1"

# libbeat uses and older version of this library (transitive dep)
[[override]]
name = "github.com/rcrowley/go-metrics"
revision = "4b2ddcb0bf13dce7963e46b97826d2695ed2f614"

[[constraint]]
name = "github.com/golang/protobuf"
version = "1.2.0"

[[constraint]]
branch = "master"
name = "github.com/golang/snappy"

[[constraint]]
name = "github.com/hashicorp/go-version"
version = "1.0.0"

[[constraint]]
name = "github.com/prometheus/prometheus"
version = "2.4.2"

[prune]
go-tests = true
unused-packages = true

[[prune.project]]
name = "github.com/elastic/beats"
unused-packages = false
25 changes: 13 additions & 12 deletions README.md
Expand Up @@ -27,25 +27,26 @@ Example Prometheusbeat event:

```
{
"@timestamp": "2018-06-12T10:33:28.122Z",
"@timestamp": "2018-09-28T11:44:07.006Z",
"@metadata": {
"beat": "prometheusbeat",
"type": "doc",
"version": "7.0.0-alpha1"
"version": "6.4.1"
},
"labels": {
"job": "prometheus",
"__name__": "scrape_samples_post_metric_relabeling",
"instance": "localhost:9090"
"le": "10",
"name": "prometheus_tsdb_tombstone_cleanup_seconds_bucket",
"instance": "localhost:9090",
"job": "prometheus"
},
"value": 0,
"host": {
"name": "example.com"
},
"value": 349,
"beat": {
"version": "6.4.1",
"name": "prometheusbeat",
"hostname": "example.com",
"version": "7.0.0-alpha1"
},
"host": {
"name": "prometheusbeat"
"hostname": "example.com"
}
}
```
Expand Down Expand Up @@ -164,4 +165,4 @@ The beat frameworks provides tools to crosscompile and package your beat for dif
make package
```

This will fetch and create all images required for the build process. The hole process to finish can take several minutes.
This will fetch and create all images required for the build process. The whole process to finish can take several minutes.
5 changes: 0 additions & 5 deletions _meta/beat.yml
Expand Up @@ -5,8 +5,3 @@
prometheusbeat:
listen: ":8080"
context: "/prometheus"
# The storage request format had a breaking change starting with Prometheus 1.7.
# Set the version accordingly.
# 1: Prometheus < 1.7
# 2: Prometheus >= 1.7
version: 2
2 changes: 1 addition & 1 deletion _meta/kibana.generated/6/index-pattern/prometheusbeat.json
Expand Up @@ -12,5 +12,5 @@
"version": 1
}
],
"version": "7.0.0-alpha1"
"version": "6.4.0"
}
19 changes: 18 additions & 1 deletion include/fields.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion main_test.go
Expand Up @@ -6,7 +6,7 @@ import (
"flag"
"testing"

"github.com/boernd/mybeat/cmd"
"github.com/infonova/prometheusbeat/cmd"
)

var systemTest *bool
Expand Down
126 changes: 114 additions & 12 deletions prometheusbeat.reference.yml
Expand Up @@ -5,11 +5,6 @@
prometheusbeat:
listen: ":8080"
context: "/prometheus"
# The storage request format had a breaking change starting with Prometheus 1.7.
# Set the version accordingly.
# 1: Prometheus < 1.7
# 2: Prometheus >= 1.7
version: 2

#================================ General ======================================

Expand Down Expand Up @@ -242,6 +237,9 @@ output.elasticsearch:
# Set gzip compression level.
#compression_level: 0

# Configure escaping html symbols in strings.
#escape_html: true

# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
Expand Down Expand Up @@ -282,7 +280,18 @@ output.elasticsearch:
# The default is 50.
#bulk_max_size: 50

# Configure http request timeout before failing an request to Elasticsearch.
# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s

# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s

# Configure http request timeout before failing a request to Elasticsearch.
#timeout: 90

# Use SSL settings for HTTPS.
Expand Down Expand Up @@ -336,6 +345,9 @@ output.elasticsearch:
# Set gzip compression level.
#compression_level: 3

# Configure escaping html symbols in strings.
#escape_html: true

# Optional maximum time to live for a connection to Logstash, after which the
# connection will be re-established. A value of `0s` (the default) will
# disable this feature.
Expand All @@ -346,7 +358,7 @@ output.elasticsearch:
# Optional load balance the events between the Logstash hosts. Default is false.
#loadbalance: false

# Number of batches to be sent asynchronously to logstash while processing
# Number of batches to be sent asynchronously to Logstash while processing
# new batches.
#pipelining: 2

Expand All @@ -355,6 +367,17 @@ output.elasticsearch:
# if no error is encountered.
#slow_start: false

# The number of seconds to wait before trying to reconnect to Logstash
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s

# The maximum number of seconds to wait before attempting to connect to
# Logstash after a network error. The default is 60s.
#backoff.max: 60s

# Optional index name. The default index name is set to prometheusbeat
# in all lowercase.
#index: 'prometheusbeat'
Expand Down Expand Up @@ -401,6 +424,21 @@ output.elasticsearch:
# never, once, and freely. Default is never.
#ssl.renegotiation: never

# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
# and retry until all events are published. Set max_retries to a value less
# than 0 to retry until all events are published. The default is 3.
#max_retries: 3

# The maximum number of events to bulk in a single Logstash request. The
# default is 2048.
#bulk_max_size: 2048

# The number of seconds to wait for responses from the Logstash server before
# timing out. The default is 30s.
#timeout: 30s

#------------------------------- Kafka output ----------------------------------
#output.kafka:
# Boolean flag to enable or disable the output module.
Expand Down Expand Up @@ -436,9 +474,16 @@ output.elasticsearch:
#username: ''
#password: ''

# Kafka version prometheusbeat is assumed to run against. Defaults to the oldest
# supported stable version (currently version 0.8.2.0)
#version: 0.8.2
# Kafka version prometheusbeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'

# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false

# Configure escaping html symbols in strings.
#escape_html: true

# Metadata update configuration. Metadata do contain leader information
# deciding which broker to use when publishing.
Expand Down Expand Up @@ -486,6 +531,10 @@ output.elasticsearch:
# default is gzip.
#compression: gzip

# Set the compression level. Currently only gzip provides a compression level
# between 0 and 9. The default value is chosen by the compression algorithm.
#compression_level: 4

# The maximum permitted size of JSON-encoded messages. Bigger messages will be
# dropped. The default value is 1000000 (bytes). This value should be equal to
# or less than the broker's message.max.bytes.
Expand Down Expand Up @@ -542,6 +591,14 @@ output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true

# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false

# Configure escaping html symbols in strings.
#escape_html: true

# The list of Redis servers to connect to. If load balancing is enabled, the
# events are distributed to the servers in the list. If one server becomes
# unreachable, the events are distributed to the reachable servers only.
Expand Down Expand Up @@ -589,6 +646,17 @@ output.elasticsearch:
# until all events are published. The default is 3.
#max_retries: 3

# The number of seconds to wait before trying to reconnect to Redis
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s

# The maximum number of seconds to wait before attempting to connect to
# Redis after a network error. The default is 60s.
#backoff.max: 60s

# The maximum number of events to bulk in a single Redis request or pipeline.
# The default is 2048.
#bulk_max_size: 2048
Expand Down Expand Up @@ -643,6 +711,14 @@ output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true

# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false

# Configure escaping html symbols in strings.
#escape_html: true

# Path to the directory where to save the generated files. The option is
# mandatory.
#path: "/tmp/prometheusbeat"
Expand Down Expand Up @@ -670,8 +746,13 @@ output.elasticsearch:
# Boolean flag to enable or disable the output module.
#enabled: true

# Pretty print json event
#pretty: false
# Configure JSON encoding
#codec.json:
# Pretty print json event
#pretty: false

# Configure escaping html symbols in strings.
#escape_html: true

#================================= Paths ======================================

Expand Down Expand Up @@ -769,6 +850,13 @@ output.elasticsearch:
# Path to fields.yml file to generate the template
#setup.template.fields: "${path.config}/fields.yml"

# A list of fields to be added to the template and Kibana index pattern. Also
# specify setup.template.overwrite: true to overwrite the existing template.
# This setting is experimental.
#setup.template.append_fields:
#- name: field_name
# type: field_type

# Enable json template loading. If this is enabled, the fields.yml is ignored.
#setup.template.json.enabled: false

Expand Down Expand Up @@ -955,6 +1043,17 @@ logging.files:
# The default is 50.
#bulk_max_size: 50

# The number of seconds to wait before trying to reconnect to Elasticsearch
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
#backoff.init: 1s

# The maximum number of seconds to wait before attempting to connect to
# Elasticsearch after a network error. The default is 60s.
#backoff.max: 60s

# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90

Expand Down Expand Up @@ -994,6 +1093,9 @@ logging.files:
# never, once, and freely. Default is never.
#ssl.renegotiation: never

#metrics.period: 10s
#state.period: 1m

#================================ HTTP Endpoint ======================================
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
Expand Down

0 comments on commit c83341c

Please sign in to comment.